home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Language/OS - Multiplatform Resource Library
/
LANGUAGE OS.iso
/
pcr
/
pcr4_4.lha
/
DIST
/
gc
/
GCconstrained_malloc.c
< prev
next >
Wrap
C/C++ Source or Header
|
1991-12-13
|
4KB
|
141 lines
/*
* Allocate a pointerful block of at least *size bytes, starting within
* maxDist bytes of address. Returns NIL if unsuccessful.
* May take a long time, especially if unsuccessful.
* If successful, the actual size is returned in *size.
* The address argument should be an address in or below the heap.
*/
# include <xr/GC.h>
# include <xr/GCPrivate.h>
XR_Pointer XR_ConstrainedMalloc(size, address, maxDist)
unsigned long * size;
XR_Pointer address;
unsigned long maxDist;
{
register unsigned long lw;
struct obj * result;
register struct obj * op;
register struct obj * last;
register struct obj * lb;
register struct obj * ub;
struct obj * garbage = 0; /* List (linked through 0th comp) of */
/* unusable objects we already allocated */
int initial_gc_no = GC_gc_no;
if (address > (XR_Pointer)GC_heaplim) {
return(0);
}
if (((unsigned long)address) < maxDist) {
lb = 0;
} else {
lb = (struct obj *)(((unsigned long)address) - maxDist);
}
ub = (struct obj *)(((unsigned long)address) + maxDist);
# define in_range(p) ((p) <= ub && (p) >= lb)
XR_MonitorEntry(&GC_allocate_ml);
for(;;) {
/* First try small object free lists: */
for (lw = BYTES_TO_WORDS(*size + (sizeof (word)) -1);
lw <= MAXOBJSZ; lw++) {
# ifdef MERGE_SIZES
lw = GC_size_map[lw];
# endif
if (op = objfreelist[lw]) {
if (in_range(op)) {
objfreelist[lw] = get_obj_link(op);
op->obj_component[0] = 0;
result = op;
*size = WORDS_TO_BYTES(lw);
goto done;
} else {
last = op;
while (op = get_obj_link(last)) {
if (in_range(op)) {
set_obj_link(last, get_obj_link(op));
op -> obj_component[0] = 0;
result = op;
*size = WORDS_TO_BYTES(lw);
goto done;
} else {
last = op;
}
}
}
}
}
/* Repeatedly try to allocate heap blocks until we get an appropriate */
/* one, or we run out. Explicitly discard unused blocks once we're */
/* done. */
{
extern struct hblk * GC_savhbp; /* Start point for heap block */
/* allocator. */
struct hblk * hbp;
struct hblk * prevhbp;
lw = (*size + HDR_BYTES + HBLKSIZE-1) & ~HBLKMASK;
lw -= HDR_BYTES;
lw = BYTES_TO_WORDS(lw);
/* Try to set GC_savhbp to one before the first block that's */
/* in range. */
prevhbp = 0;
hbp = GC_hblkfreelist;
while(hbp != 0 && !in_range((struct obj *)hbp)) {
prevhbp = hbp;
hbp = hb_next(hbp);
}
GC_savhbp = prevhbp;
while (((struct obj *)GC_heaplim) <= ub || GC_sufficient_hb(lw)) {
hbp = GC_allochblk(lw);
if (hbp == 0) {
goto done;
}
GC_add_hblklist(hbp);
op = (struct obj *) (hbp -> hb_body);
if (in_range(op)) {
result = op;
*size = WORDS_TO_BYTES(lw);
goto done;
} else {
op -> obj_component[0] = (word)garbage;
garbage = op;
}
}
}
if (GC_gc_no == initial_gc_no) {
GC_demand_full_and_wait();
XR_MonitorExit(&GC_allocate_ml);
{
register struct hblk ** rlp;
for( rlp = reclaim_list; rlp < &reclaim_list[MAXOBJSZ+1]; rlp++ ) {
while (*rlp != (struct hblk *)0) {
GC_reclaim_composite(rlp);
}
}
for( rlp = areclaim_list; rlp < &areclaim_list[MAXOBJSZ+1]; rlp++ ) {
while (*rlp != (struct hblk *)0) {
GC_reclaim_atomic(rlp);
}
}
}
XR_MonitorEntry(&GC_allocate_ml);
} else {
break;
}
}
result = 0;
done:
XR_MonitorExit(&GC_allocate_ml);
/* Delete unused heap blocks */
while (garbage != (struct obj *)0) {
op = garbage;
garbage = (struct obj *)(garbage -> obj_component[0]);
GC_free(op);
}
return((XR_Pointer)result);
}